From 1a0e8a8613ae0170561918bc783963a4981b5162 Mon Sep 17 00:00:00 2001 From: "kaf24@freefall.cl.cam.ac.uk" Date: Tue, 21 Sep 2004 17:39:18 +0000 Subject: [PATCH] bitkeeper revision 1.1159.79.11 (41506746mdOECqqSuCdLRNy-BA_bQQ) A couple more network fixes. --- .../drivers/xen/netback/netback.c | 6 ++---- .../drivers/xen/netfront/netfront.c | 19 ++++++++++++++----- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c b/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c index f31e30afa5..ca12459175 100644 --- a/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c +++ b/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c @@ -138,7 +138,6 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev) * We do not copy the packet unless: * 1. The data is shared; or * 2. The data is not allocated from our special cache. - * The copying method is taken from skb_copy(). * NB. We also couldn't cope with fragmented packets, but we won't get * any because we not advertise the NETIF_F_SG feature. */ @@ -148,10 +147,9 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev) struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len); if ( unlikely(nskb == NULL) ) goto drop; - /* Account for any reservation already made by dev_alloc_skb(). */ - skb_reserve(nskb, hlen - (nskb->data - nskb->head)); + skb_reserve(nskb, hlen); __skb_put(nskb, skb->len); - (void)skb_copy_bits(skb, -hlen, nskb->head, hlen + skb->len); + (void)skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen); nskb->dev = skb->dev; dev_kfree_skb(skb); skb = nskb; diff --git a/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/netfront.c b/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/netfront.c index 15724f5cf4..bc3d236326 100644 --- a/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/netfront.c +++ b/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/netfront.c @@ -377,13 +377,14 @@ static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >= PAGE_SIZE) ) { - struct sk_buff *new_skb; - if ( unlikely((new_skb = alloc_xen_skb(skb->len)) == NULL) ) + struct sk_buff *nskb; + if ( unlikely((nskb = alloc_xen_skb(skb->len)) == NULL) ) goto drop; - skb_put(new_skb, skb->len); - memcpy(new_skb->data, skb->data, skb->len); + skb_put(nskb, skb->len); + memcpy(nskb->data, skb->data, skb->len); + nskb->dev = skb->dev; dev_kfree_skb(skb); - skb = new_skb; + skb = nskb; } spin_lock_irq(&np->tx_lock); @@ -553,13 +554,21 @@ static int netif_poll(struct net_device *dev, int *pbudget) /* Only copy the packet if it fits in the current MTU. */ if ( skb->len <= (dev->mtu + ETH_HLEN) ) { + if ( (skb->tail > skb->end) && net_ratelimit() ) + printk(KERN_INFO "Received packet needs %d bytes more " + "headroom.\n", skb->tail - skb->end); + if ( (nskb = alloc_xen_skb(skb->len + 2)) != NULL ) { skb_reserve(nskb, 2); skb_put(nskb, skb->len); memcpy(nskb->data, skb->data, skb->len); + nskb->dev = skb->dev; } } + else if ( net_ratelimit() ) + printk(KERN_INFO "Received packet too big for MTU " + "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu); /* Reinitialise and then destroy the old skbuff. */ skb->len = 0; -- 2.30.2